Basic commands in tensorflow


In [7]:
from __future__ import print_function
import tensorflow as tf

In [3]:
#Basic interactive session

# Enter an interactive TensorFlow Session.
sess = tf.InteractiveSession()

# Define a var and a constant
x = tf.Variable([1.0, 2.0])
a = tf.constant([3.0, 3.0])

# Initialize the var 'x' using the run() method
x.initializer.run()

# Add an op to subtract 'a' from 'x'.  Run it and print the result
sub = tf.sub(x, a)
print(sub.eval())
# ==> [-2. -1.]

# Close the Session when we're done.
sess.close()


[-2. -1.]

Simple linear model in a interactive session


In [4]:
# Get some data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('/home/ubuntu/data/training/image/mnist', one_hot=True)


Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
Extracting /tmp/MNIST_data/train-images-idx3-ubyte.gz
Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
Extracting /tmp/MNIST_data/train-labels-idx1-ubyte.gz
Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
Extracting /tmp/MNIST_data/t10k-images-idx3-ubyte.gz
Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
Extracting /tmp/MNIST_data/t10k-labels-idx1-ubyte.gz

In [9]:
# Interactive session for train a model
import tensorflow as tf
import numpy as np

# Start interactive session
sess = tf.InteractiveSession()

# Declare input variables
x = tf.placeholder(tf.float32, shape=[None, 784])
y = tf.placeholder(tf.float32, shape=[None, 10])

#Trainable variables
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))

#Model
y_pred = tf.nn.softmax(tf.matmul(x,W) + b)

# Loss
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(y_pred), reduction_indices=[1]))

# Trainer
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)


#Loop to train the model. 30 batches of 100 cases
sess.run(tf.initialize_all_variables())
for i in range(30):
    batch = mnist.train.next_batch(500)
    train_step.run(feed_dict={x: batch[0], y: batch[1]})
    print(i, ' - ',cross_entropy.eval(feed_dict={x: batch[0], y: batch[1]}))


0  -  1.81275
1  -  1.60889
2  -  1.30185
3  -  1.24295
4  -  1.06077
5  -  1.02548
6  -  0.837058
7  -  0.813003
8  -  0.772033
9  -  0.794147
10  -  0.708654
11  -  0.657701
12  -  0.60068
13  -  0.668502
14  -  0.660577
15  -  0.734497
16  -  0.689952
17  -  0.687497
18  -  0.709138
19  -  0.654925
20  -  0.704622
21  -  0.547344
22  -  0.595498
23  -  0.440265
24  -  0.497913
25  -  0.601578
26  -  0.51206
27  -  0.505472
28  -  0.510493
29  -  0.418059

In [11]:
#Evaluate variables

# Evaluata trainable variables
print(b.eval())
print(np.max(W.eval()))

# Evaluate results variables
print(y.eval(feed_dict={x: batch[0], y: batch[1]}))


[-0.06907193  0.135791   -0.0294337  -0.04222807  0.04624839  0.073547
  0.00773067  0.06736173 -0.17107926 -0.01886572]
0.295497
[[ 0.  0.  0. ...,  0.  0.  1.]
 [ 0.  1.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 ..., 
 [ 0.  1.  0. ...,  0.  0.  0.]
 [ 0.  0.  1. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]]

In [12]:
# Close the Session when we're done.
sess.close()

In [ ]:


In [ ]:


In [ ]:
#Basic usage in batch mode

# Define a graph
graph = tf.Graph()
with graph.as_default():
    # graph definition
    
    
# Execute a graph to train a network
with tf.Session(graph=graph) as session:
    print('Initializing')
    tf.initialize_all_variables().run()
    for epoch in range(nEpochs):
        for batch in batch_list:
            feedDict = {} # dictionary of batch data to run the graph
            _, param1_out, param2_out = session.run([optimizer, param1_in, param2_in], feed_dict=feedDict)
    
    
# Execute a graph to score data

In [ ]:
#SELECT DEVICE
with tf.device('/cpu:0'):
    # Include here the graph operations for the CPU.


# Creates a session with log_device_placement set to True.
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))


# LIMIT THE MEMORY OF THE GPU
# Assume that you have 12GB of GPU memory and want to allocate ~4GB:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)

sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

In [ ]:
# List of variables saved in a model file
path_model = '/home/jorge/data/tesis/handwriting/p05_ctc/IAM_corleone_first_model/'
reader = tf.train.NewCheckpointReader(path_model + "modelCTC_original_images_01_epoch_95.ckpt")
print(reader.debug_string().decode("utf-8"))

In [ ]:

Load and save models


In [14]:
# Create and save model

import tensorflow as tf

#Load data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('/tmp/MNIST_data', one_hot=True)


sess = tf.InteractiveSession()

# Define graph
x = tf.placeholder(tf.float32, shape=[None, 784], name='x')
y = tf.placeholder(tf.float32, shape=[None, 10], name='y')

W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))


#Prediction
y_pred = tf.nn.softmax(tf.matmul(x,W) + b, name='y_pred')

#Loss
cross_entropy = -tf.reduce_sum(y*tf.log(y_pred), name='cross_entropy')

# Train graph
train_step = tf.train.GradientDescentOptimizer(0.01, name='train_step').minimize(cross_entropy)


# Inicialize graph vars
sess.run(tf.initialize_all_variables())
for i in range(100):
    batch = mnist.train.next_batch(50)
    train_step.run(feed_dict={x: batch[0], y: batch[1]})

# Predict and evaluate    
correct_prediction = tf.equal(tf.argmax(y_pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='Accuracy')

print('Accuracy test', accuracy.eval(feed_dict={x: mnist.test.images, y: mnist.test.labels}))


# Add to the collection the vars that we need in the future

# - For train: all the placeholders and the train_step
#tf.add_to_collection('x', x)
#tf.add_to_collection('y', y)
#tf.add_to_collection('train_step', train_step)

# - For score: X placeholders and y_pred
#tf.add_to_collection('x', x)
#tf.add_to_collection('y_pred', y_pred)

# - For validation: All placeholders and loss & accuracy
#tf.add_to_collection('x', x)
#tf.add_to_collection('y', y)
#tf.add_to_collection('cross_entropy', cross_entropy)
#tf.add_to_collection('accuracy', accuracy)



# Create a saver and save weigths.
saver = tf.train.Saver(max_to_keep=0)
saver.save(sess, '/tmp/my-model',)


#Close session
sess.close()


Extracting /tmp/MNIST_data/train-images-idx3-ubyte.gz
Extracting /tmp/MNIST_data/train-labels-idx1-ubyte.gz
Extracting /tmp/MNIST_data/t10k-images-idx3-ubyte.gz
Extracting /tmp/MNIST_data/t10k-labels-idx1-ubyte.gz
Accuracy test 0.8834

In [1]:
# Continue training a model

import tensorflow as tf

#Load data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('/tmp/MNIST_data', one_hot=True)

sess = tf.InteractiveSession()

#Load model
new_saver = tf.train.import_meta_graph('/tmp/my-model.meta')
new_saver.restore(sess, '/tmp/my-model')

#Load vars
#x = tf.get_collection('x')[0]
#y = tf.get_collection('y')[0]

#Continue training
train_step = tf.get_collection('train_step')[0]
for i in range(900):
    batch = mnist.train.next_batch(50)
    train_step.run(feed_dict={x: batch[0], y: batch[1]})
    accuracy = tf.get_collection('accuracy')[0]

print('Accuracy test', accuracy.eval(feed_dict={x: mnist.test.images, y: mnist.test.labels}))

sess.close()


Extracting /home/jorge/data/training/tensorflow/MNIST_data/train-images-idx3-ubyte.gz
Extracting /home/jorge/data/training/tensorflow/MNIST_data/train-labels-idx1-ubyte.gz
Extracting /home/jorge/data/training/tensorflow/MNIST_data/t10k-images-idx3-ubyte.gz
Extracting /home/jorge/data/training/tensorflow/MNIST_data/t10k-labels-idx1-ubyte.gz
Tensor("Accuracy:0", shape=(), dtype=float32)
('Accuracy test', 0.90740007)

In [1]:
# Score new data
import tensorflow as tf

#Load data
data_path = '/home/jorge/data/training/tensorflow/'
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets(data_path + 'MNIST_data', one_hot=True)

sess = tf.InteractiveSession()

#Load model
new_saver = tf.train.import_meta_graph('/tmp/my-model.meta')
new_saver.restore(sess, '/tmp/my-model')

#Load vars
x = tf.get_collection('x')[0]
y_pred = tf.get_collection('y_pred')[0]

print('Prediction test', y_pred.eval(feed_dict={x: mnist.test.images[0:2]}))

sess.close()


Extracting /home/jorge/data/training/tensorflow/MNIST_data/train-images-idx3-ubyte.gz
Extracting /home/jorge/data/training/tensorflow/MNIST_data/train-labels-idx1-ubyte.gz
Extracting /home/jorge/data/training/tensorflow/MNIST_data/t10k-images-idx3-ubyte.gz
Extracting /home/jorge/data/training/tensorflow/MNIST_data/t10k-labels-idx1-ubyte.gz
('Prediction test', array([[  6.45731576e-04,   1.06451043e-05,   3.88225482e-04,
          2.63259769e-03,   3.83650011e-04,   2.99013918e-04,
          1.43197049e-05,   9.90874648e-01,   3.23611312e-04,
          4.42757038e-03],
       [  1.20471008e-01,   9.97607596e-04,   6.55485213e-01,
          2.02858914e-02,   3.41765963e-06,   4.09900211e-02,
          1.40440777e-01,   3.72032150e-06,   2.12676339e-02,
          5.46845840e-05]], dtype=float32))

In [1]:
# Evaluate model

import tensorflow as tf

#Load data
data_path = '/home/jorge/data/training/tensorflow/'
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets(data_path + 'MNIST_data', one_hot=True)

sess = tf.InteractiveSession()

#Load model
new_saver = tf.train.import_meta_graph('/tmp/my-model.meta')
new_saver.restore(sess, '/tmp/my-model')

#Load vars
x = tf.get_collection('x')[0]
y = tf.get_collection('y')[0]

accuracy = tf.get_collection('accuracy')[0]
cross_entropy = tf.get_collection('cross_entropy')[0]
print('cross_entropy test', cross_entropy.eval(feed_dict={x: mnist.test.images, y: mnist.test.labels}))
print('Accuracy test', accuracy.eval(feed_dict={x: mnist.test.images, y: mnist.test.labels}))

sess.close()


Extracting /home/jorge/data/training/tensorflow/MNIST_data/train-images-idx3-ubyte.gz
Extracting /home/jorge/data/training/tensorflow/MNIST_data/train-labels-idx1-ubyte.gz
Extracting /home/jorge/data/training/tensorflow/MNIST_data/t10k-images-idx3-ubyte.gz
Extracting /home/jorge/data/training/tensorflow/MNIST_data/t10k-labels-idx1-ubyte.gz
('cross_entropy test', 4099.9136)
('Accuracy test', 0.88340008)

In [ ]:

Save model as pb file


In [ ]:
sess = tf.InteractiveSession()

### create some graph here ###
##############################

graph_def = sess.graph.as_graph_def()
output_node_names = "output0,output1" # put the names of the output nodes here

# freeze all parameters and save
output_graph_def = graph_util.convert_variables_to_constants(
        sess, graph_def, output_node_names.split(","))
with tf.gfile.GFile(output_graph_file, "wb") as f:
    f.write(output_graph_def.SerializeToString())

TensorFlow Model Benchmark Tool

1 build the binary: $bazel build -c opt tensorflow/tools/benchmark:benchmark_model

2 Run on your compute graph: $bazel-bin/tensorflow/tools/benchmark/benchmark_model \ --graph=tensorflow_inception_graph.pb \ --input_layer="input:0" \ --input_layer_shape="1,224,224,3" \ --input_layer_type="float" \ --output_layer="output:0"


In [ ]:


In [ ]: